bitkeeper revision 1.1236.1.113 (42407388qtYnzMaBpNXqBANV55c6Qw)
authorkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>
Tue, 22 Mar 2005 19:35:36 +0000 (19:35 +0000)
committerkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>
Tue, 22 Mar 2005 19:35:36 +0000 (19:35 +0000)
Manual merge.

1  2 
xen/arch/x86/domain.c
xen/common/page_alloc.c
xen/common/schedule.c

Simple merge
index 9879616e15e2a70d1a4444f5838e6cc13d72d865,8637b8cea5cd85d1880815bbae66500b42220160..026217c5afcaa1f85019517e0e6ccb54a0b2b869
@@@ -29,8 -28,8 +29,9 @@@
  #include <xen/spinlock.h>
  #include <xen/slab.h>
  #include <xen/irq.h>
+ #include <xen/softirq.h>
  #include <asm/domain_page.h>
 +#include <asm/page.h>
  
  /*
   * Comma-separated list of hexadecimal page numbers containing bad bytes.
@@@ -545,10 -551,7 +546,9 @@@ struct pfn_info *alloc_domheap_pages(st
  void free_domheap_pages(struct pfn_info *pg, unsigned int order)
  {
      int            i, drop_dom_ref;
 -    struct domain *d = pg->u.inuse.domain;
 +    struct domain *d = page_get_owner(pg);
 +    struct exec_domain *ed;
-     void          *p;
 +    int cpu_mask = 0;
  
      ASSERT(!in_irq());
  
          {
              ASSERT((pg[i].u.inuse.type_info & PGT_count_mask) == 0);
              pg[i].tlbflush_timestamp  = tlbflush_current_time();
 -            pg[i].u.free.cpu_mask     = 1 << d->processor;
 +            pg[i].u.free.cpu_mask     = cpu_mask;
              list_del(&pg[i].list);
+         }
+         d->tot_pages -= 1 << order;
+         drop_dom_ref = (d->tot_pages == 0);
+         spin_unlock_recursive(&d->page_alloc_lock);
  
 -        if ( likely(!test_bit(DF_DYING, &d->flags)) )
++        if ( likely(!test_bit(DF_DYING, &d->d_flags)) )
+         {
+             free_heap_pages(MEMZONE_DOM, pg, order);
+         }
+         else
+         {
              /*
               * Normally we expect a domain to clear pages before freeing them,
               * if it cares about the secrecy of their contents. However, after
@@@ -617,12 -622,62 +622,72 @@@ unsigned long avail_domheap_pages(void
      return avail[MEMZONE_DOM];
  }
  
+ /*************************
+  * PAGE SCRUBBING
+  */
+ static spinlock_t page_scrub_lock;
+ struct list_head page_scrub_list;
+ static void page_scrub_softirq(void)
+ {
+     struct list_head *ent;
+     struct pfn_info  *pg;
+     void             *p;
+     int               i;
+     s_time_t          start = NOW();
+     /* Aim to do 1ms of work (ten percent of a 10ms jiffy). */
+     do {
+         spin_lock(&page_scrub_lock);
+         if ( unlikely((ent = page_scrub_list.next) == &page_scrub_list) )
+         {
+             spin_unlock(&page_scrub_lock);
+             return;
+         }
+         
+         /* Peel up to 16 pages from the list. */
+         for ( i = 0; i < 16; i++ )
+             if ( (ent = ent->next) == &page_scrub_list )
+                 break;
+         
+         /* Remove peeled pages from the list. */
+         ent->next->prev = &page_scrub_list;
+         page_scrub_list.next = ent->next;
+         
+         spin_unlock(&page_scrub_lock);
+         
+         /* Working backwards, scrub each page in turn. */
+         while ( ent != &page_scrub_list )
+         {
+             pg = list_entry(ent, struct pfn_info, list);
+             ent = ent->prev;
+             p = map_domain_mem(page_to_phys(pg));
+             clear_page(p);
+             unmap_domain_mem(p);
+             free_heap_pages(MEMZONE_DOM, pg, 0);
+         }
+     } while ( (NOW() - start) < MILLISECS(1) );
+ }
+ static __init int page_scrub_init(void)
+ {
+     spin_lock_init(&page_scrub_lock);
+     INIT_LIST_HEAD(&page_scrub_list);
+     open_softirq(PAGE_SCRUB_SOFTIRQ, page_scrub_softirq);
+     return 0;
+ }
+ __initcall(page_scrub_init);
++
 +/*
 + * Local variables:
 + * mode: C
 + * c-set-style: "BSD"
 + * c-basic-offset: 4
 + * tab-width: 4
 + * indent-tabs-mode: nil
 + * End:
 + */
index 177a322d75c5763013a58e409adfbcf51a02fd15,d16c2192f4dc8ffe8860fec5b3849283cfb235d5..562daa6132538b1ae5248f239fa17a397bcef5e5
@@@ -483,11 -434,13 +483,13 @@@ static void t_timer_fn(unsigned long un
  
      TRACE_0D(TRC_SCHED_T_TIMER_FN);
  
 -    if ( !is_idle_task(d) && update_dom_time(d) )
 -        send_guest_virq(d, VIRQ_TIMER);
 +    if ( !is_idle_task(ed->domain) && update_dom_time(ed) )
 +        send_guest_virq(ed, VIRQ_TIMER);
  
 -    t_timer[d->processor].expires = NOW() + MILLISECS(10);
 -    add_ac_timer(&t_timer[d->processor]);
+     page_scrub_schedule_work();
 +    t_timer[ed->processor].expires = NOW() + MILLISECS(10);
 +    add_ac_timer(&t_timer[ed->processor]);
  }
  
  /* Domain timer function, sends a virtual timer interrupt to domain */